sync w/ head.
tools/libxc/xen/*
tools/misc/miniterm/miniterm
tools/misc/xen_cpuperf
-tools/vnet/vnet-module/.tmp_versions/*
-tools/vnet/vnet-module/.*.cmd
+ tools/vnet/gc
+ tools/vnet/gc*/*
+ tools/vnet/vnet-module/*.ko
++tools/vnet/vnet-module/.*.cmd
++tools/vnet/vnet-module/.tmp_versions/*
+ tools/vnet/vnet-module/vnet_module.mod.*
+ tools/vnetd/vnetd
tools/web-shutdown.tap
+tools/x2d2/minixend
tools/xentrace/xentrace
tools/xfrd/xfrd
xen/arch/x86/asm-offsets.s
}
static inline int do_trap(int trapnr, char *str,
- struct xen_regs *regs,
- long error_code, int use_error_code)
+ struct xen_regs *regs,
+ int use_error_code)
{
- struct domain *d = current;
- struct trap_bounce *tb = &d->thread.trap_bounce;
+ struct exec_domain *ed = current;
+ struct trap_bounce *tb = &ed->thread.trap_bounce;
trap_info_t *ti;
unsigned long fixup;
goto xen_fault;
ti = current->thread.traps + trapnr;
- tb->flags = use_error_code ? TBF_TRAP : TBF_TRAP_NOCODE;
- tb->error_code = error_code;
- tb->cs = ti->cs;
- tb->eip = ti->address;
+ tb->flags = TBF_EXCEPTION;
+ tb->cs = ti->cs;
+ tb->eip = ti->address;
+ if ( use_error_code )
+ {
+ tb->flags |= TBF_EXCEPTION_ERRCODE;
+ tb->error_code = regs->error_code;
+ }
if ( TI_GET_IF(ti) )
- d->shared_info->vcpu_data[0].evtchn_upcall_mask = 1;
+ ed->vcpu_info->evtchn_upcall_mask = 1;
return 0;
xen_fault:
DO_ERROR(17, "alignment check", alignment_check)
DO_ERROR_NOCODE(19, "simd error", simd_coprocessor_error)
- asmlinkage int do_int3(struct xen_regs *regs, long error_code)
+ asmlinkage int do_int3(struct xen_regs *regs)
{
- struct domain *d = current;
- struct trap_bounce *tb = &d->thread.trap_bounce;
+ struct exec_domain *ed = current;
+ struct trap_bounce *tb = &ed->thread.trap_bounce;
trap_info_t *ti;
- DEBUGGER_trap_entry(TRAP_int3, regs, error_code);
+ DEBUGGER_trap_entry(TRAP_int3, regs);
if ( unlikely((regs->cs & 3) == 0) )
{
}
ti = current->thread.traps + 3;
- tb->flags = TBF_TRAP_NOCODE;
- tb->error_code = error_code;
- tb->cs = ti->cs;
- tb->eip = ti->address;
+ tb->flags = TBF_EXCEPTION;
+ tb->cs = ti->cs;
+ tb->eip = ti->address;
if ( TI_GET_IF(ti) )
- d->shared_info->vcpu_data[0].evtchn_upcall_mask = 1;
+ ed->vcpu_info->evtchn_upcall_mask = 1;
return 0;
}
}
if ( (addr < PAGE_OFFSET) &&
- ((error_code & 3) == 3) && /* write-protection fault */
+ ((regs->error_code & 3) == 3) && /* write-protection fault */
ptwr_do_page_fault(addr) )
{
- if ( unlikely(d->mm.shadow_mode) )
+ if ( unlikely(ed->mm.shadow_mode) )
- (void)shadow_fault(addr, error_code);
+ (void)shadow_fault(addr, regs->error_code);
return EXCRET_fault_fixed;
}
}
- if ( unlikely(d->mm.shadow_mode) &&
+ if ( unlikely(ed->mm.shadow_mode) &&
- (addr < PAGE_OFFSET) && shadow_fault(addr, error_code) )
+ (addr < PAGE_OFFSET) && shadow_fault(addr, regs->error_code) )
return EXCRET_fault_fixed;
- if ( unlikely(addr >= LDT_VIRT_START) &&
- (addr < (LDT_VIRT_START + (d->mm.ldt_ents*LDT_ENTRY_SIZE))) )
+ if ( unlikely(addr >= LDT_VIRT_START(ed)) &&
+ (addr < (LDT_VIRT_START(ed) + (ed->mm.ldt_ents*LDT_ENTRY_SIZE))) )
{
/*
* Copy a mapping from the guest's LDT, if it is valid. Otherwise we
if ( unlikely(!(regs->cs & 3)) )
goto xen_fault;
- ti = d->thread.traps + 14;
+ ti = ed->thread.traps + 14;
- tb->flags = TBF_TRAP_CR2; /* page fault pushes %cr2 */
+ tb->flags = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE | TBF_EXCEPTION_CR2;
tb->cr2 = addr;
- tb->error_code = error_code;
+ tb->error_code = regs->error_code;
tb->cs = ti->cs;
tb->eip = ti->address;
if ( TI_GET_IF(ti) )
if ( likely((fixup = search_exception_table(regs->eip)) != 0) )
{
perfc_incrc(copy_user_faults);
- if ( !d->mm.shadow_mode )
+ if ( !ed->mm.shadow_mode )
- DPRINTK("Page fault: %08lx -> %08lx\n", regs->eip, fixup);
+ DPRINTK("Page fault: %08x -> %08lx\n", regs->eip, fixup);
regs->eip = fixup;
return 0;
}
return 0;
}
- asmlinkage int do_general_protection(struct xen_regs *regs, long error_code)
+ asmlinkage int do_general_protection(struct xen_regs *regs)
{
- struct domain *d = current;
- struct trap_bounce *tb = &d->thread.trap_bounce;
+ struct exec_domain *ed = current;
+ struct domain *d = ed->domain;
+ struct trap_bounce *tb = &ed->thread.trap_bounce;
trap_info_t *ti;
unsigned long fixup;
return;
if ( test_and_clear_bit(0, &nmi_softirq_reason) )
- send_guest_virq(dom0, VIRQ_PARITY_ERR);
+ send_guest_virq(dom0->exec_domain[0], VIRQ_PARITY_ERR);
if ( test_and_clear_bit(1, &nmi_softirq_reason) )
- send_guest_virq(dom0, VIRQ_IO_ERR);
+ send_guest_virq(dom0->exec_domain[0], VIRQ_IO_ERR);
}
- asmlinkage int math_state_restore(struct xen_regs *regs, long error_code)
+ asmlinkage int math_state_restore(struct xen_regs *regs)
{
/* Prevent recursion. */
clts();
restore_fpu(current);
else
init_fpu();
- set_bit(DF_USEDFPU, ¤t->flags); /* so we fnsave on switch_to() */
+ set_bit(EDF_USEDFPU, ¤t->ed_flags); /* so we fnsave on switch_to() */
}
- if ( test_and_clear_bit(DF_GUEST_STTS, ¤t->flags) )
+ if ( test_and_clear_bit(EDF_GUEST_STTS, ¤t->ed_flags) )
{
struct trap_bounce *tb = ¤t->thread.trap_bounce;
- tb->flags = TBF_TRAP_NOCODE;
+ tb->flags = TBF_EXCEPTION;
tb->cs = current->thread.traps[7].cs;
tb->eip = current->thread.traps[7].address;
}
return EXCRET_fault_fixed;
}
- asmlinkage int do_debug(struct xen_regs *regs, long error_code)
+ asmlinkage int do_debug(struct xen_regs *regs)
{
unsigned int condition;
- struct domain *d = current;
+ struct exec_domain *d = current;
struct trap_bounce *tb = &d->thread.trap_bounce;
- DEBUGGER_trap_entry(TRAP_debug, regs, error_code);
+ DEBUGGER_trap_entry(TRAP_debug, regs);
__asm__ __volatile__("movl %%db6,%0" : "=r" (condition));
OFFSET(XREGS_gs, struct xen_regs, gs);
OFFSET(XREGS_ss, struct xen_regs, ss);
OFFSET(XREGS_eflags, struct xen_regs, eflags);
- OFFSET(XREGS_orig_eax, struct xen_regs, orig_eax);
+ OFFSET(XREGS_error_code, struct xen_regs, error_code);
+ OFFSET(XREGS_entry_vector, struct xen_regs, entry_vector);
+ OFFSET(XREGS_kernel_sizeof, struct xen_regs, esp);
+ DEFINE(XREGS_user_sizeof, sizeof(struct xen_regs));
BLANK();
- OFFSET(DOMAIN_processor, struct domain, processor);
- OFFSET(DOMAIN_shared_info, struct domain, shared_info);
- OFFSET(DOMAIN_event_sel, struct domain, thread.event_selector);
- OFFSET(DOMAIN_event_addr, struct domain, thread.event_address);
- OFFSET(DOMAIN_failsafe_sel, struct domain, thread.failsafe_selector);
- OFFSET(DOMAIN_failsafe_addr, struct domain, thread.failsafe_address);
- OFFSET(DOMAIN_trap_bounce, struct domain, thread.trap_bounce);
- OFFSET(DOMAIN_thread_flags, struct domain, thread.flags);
+ OFFSET(EDOMAIN_processor, struct exec_domain, processor);
+ OFFSET(EDOMAIN_vcpu_info, struct exec_domain, vcpu_info);
+ OFFSET(EDOMAIN_event_sel, struct exec_domain, thread.event_selector);
+ OFFSET(EDOMAIN_event_addr, struct exec_domain, thread.event_address);
+ OFFSET(EDOMAIN_failsafe_sel, struct exec_domain, thread.failsafe_selector);
+ OFFSET(EDOMAIN_failsafe_addr, struct exec_domain, thread.failsafe_address);
+ OFFSET(EDOMAIN_trap_bounce, struct exec_domain, thread.trap_bounce);
++ OFFSET(EDOMAIN_thread_flags, struct exec_domain, thread.flags);
BLANK();
- OFFSET(SHINFO_upcall_pending, shared_info_t,
- vcpu_data[0].evtchn_upcall_pending);
- OFFSET(SHINFO_upcall_mask, shared_info_t,
- vcpu_data[0].evtchn_upcall_mask);
+ OFFSET(VCPUINFO_upcall_pending, vcpu_info_t, evtchn_upcall_pending);
+ OFFSET(VCPUINFO_upcall_mask, vcpu_info_t, evtchn_upcall_mask);
BLANK();
OFFSET(TRAPBOUNCE_error_code, struct trap_bounce, error_code);
/* No special register assumptions */
failsafe_callback:
GET_CURRENT(%ebx)
- andb $~TF_failsafe_return,DOMAIN_thread_flags(%ebx)
- leal DOMAIN_trap_bounce(%ebx),%edx
- movl DOMAIN_failsafe_addr(%ebx),%eax
++ andb $~TF_failsafe_return,EDOMAIN_thread_flags(%ebx)
+ leal EDOMAIN_trap_bounce(%ebx),%edx
+ movl EDOMAIN_failsafe_addr(%ebx),%eax
movl %eax,TRAPBOUNCE_eip(%edx)
- movl DOMAIN_failsafe_sel(%ebx),%eax
+ movl EDOMAIN_failsafe_sel(%ebx),%eax
movw %ax,TRAPBOUNCE_cs(%edx)
+ movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx)
call create_bounce_frame
- subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
- movl XREGS_ds(%esp),%eax
- FAULT1: movl %eax,%gs:(%esi)
- movl XREGS_es(%esp),%eax
- FAULT2: movl %eax,%gs:4(%esi)
- movl XREGS_fs(%esp),%eax
- FAULT3: movl %eax,%gs:8(%esi)
- movl XREGS_gs(%esp),%eax
- FAULT4: movl %eax,%gs:12(%esi)
- movl %esi,XREGS_esp(%esp)
popl %ebx
popl %ecx
popl %edx
test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
jnz process_softirqs
/*test_guest_events:*/
- movl DOMAIN_shared_info(%ebx),%eax
- testb $0xFF,SHINFO_upcall_mask(%eax)
+ movl EDOMAIN_vcpu_info(%ebx),%eax
+ testb $0xFF,VCPUINFO_upcall_mask(%eax)
jnz restore_all_guest
- testb $0xFF,SHINFO_upcall_pending(%eax)
+ testb $0xFF,VCPUINFO_upcall_pending(%eax)
jz restore_all_guest
- movb $1,VCPUINFO_upcall_mask(%eax) # Upcalls are masked during delivery
/*process_guest_events:*/
- leal DOMAIN_trap_bounce(%ebx),%edx
- movl DOMAIN_event_addr(%ebx),%eax
+ leal EDOMAIN_trap_bounce(%ebx),%edx
+ movl EDOMAIN_event_addr(%ebx),%eax
movl %eax,TRAPBOUNCE_eip(%edx)
- movl DOMAIN_event_sel(%ebx),%eax
+ movl EDOMAIN_event_sel(%ebx),%eax
movw %ax,TRAPBOUNCE_cs(%edx)
+ movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
call create_bounce_frame
- movl DOMAIN_shared_info(%ebx),%eax
- movb $1,SHINFO_upcall_mask(%eax) # Upcalls are masked during delivery
++ movl EDOMAIN_vcpu_info(%ebx),%eax
++ movb $1,VCPUINFO_upcall_mask(%eax) # Upcalls are masked during delivery
jmp restore_all_guest
ALIGN
/* %edx == trap_bounce, %ebx == task_struct */
/* %eax,%ecx are clobbered. %gs:%esi contain new XREGS_ss/XREGS_esp. */
create_bounce_frame:
- mov XREGS_cs+4(%esp),%cl
- test $2,%cl
+ movb XREGS_cs+4(%esp),%cl
+ testb $2,%cl
jz 1f /* jump if returning to an existing ring-1 activation */
/* obtain ss/esp from TSS -- no current ring-1 activations */
- movl DOMAIN_processor(%ebx),%eax
+ movl EDOMAIN_processor(%ebx),%eax
/* next 4 lines multiply %eax by 8320, which is sizeof(tss_struct) */
movl %eax, %ecx
shll $7, %ecx
.previous
ALIGN
- process_guest_exception_and_events:
+ process_guest_exception_and_events:
- leal DOMAIN_trap_bounce(%ebx),%edx
+ leal EDOMAIN_trap_bounce(%ebx),%edx
- testb $~0,TRAPBOUNCE_flags(%edx)
+ testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%edx)
jz test_all_events
- call create_bounce_frame # just the basic frame
- mov TRAPBOUNCE_flags(%edx),%cl
- test $TBF_TRAP_NOCODE,%cl
- jnz 2f
- subl $4,%esi # push error_code onto guest frame
- movl TRAPBOUNCE_error_code(%edx),%eax
- FAULT13:movl %eax,%gs:(%esi)
- test $TBF_TRAP_CR2,%cl
- jz 1f
- subl $4,%esi # push %cr2 onto guest frame
- movl TRAPBOUNCE_cr2(%edx),%eax
- FAULT14:movl %eax,%gs:(%esi)
- 1: movl %esi,XREGS_esp(%esp)
- 2: movb $0,TRAPBOUNCE_flags(%edx)
+ call create_bounce_frame
jmp test_all_events
ALIGN